7.2 Model Information for Simulations
We first present the model specification for the three simulation studies. Set {{x}^{*}} as
the covariate matrix after being centered, including the constant.
\begin{align*}
Minimize \qquad &\ln L=\sum\limits_{i=1}^{n}{\ln {{D}_{i}}}+\frac{1}{2{{\sigma }^{2}}}\sum\limits_{i=1}^{n}
{{{\left( {{y}_{i}}-\boldsymbol{{{x}_{i}^{*}}\beta} \right)}^{2}}} \\
Subject \quad to \qquad
&{{g}_{1}}={{\beta }_{0}}+\hat{y}_{\sim 0}^{\max }-b\le 0 \\
&{{g}_{2}}=a-{{\beta }_{0}}-\hat{y}_{\sim 0}^{\min }\le 0 \\
&{{g}_{3}}={{\beta }_{0}}-b\le 0 \\
&{{g}_{4}}=-{{\beta }_{0}}+a\le 0 \\
&{{g}_{5}}={{\beta }_{1}}-min\left( \frac{b-\hat{y}_{\sim 1}^{\max }}{x_{1}^{*\max }},
-\frac{\hat{y}_{\sim 1}^{\min }-a}{x_{1}^{*\min }} \right)\le 0\\
&\text{ }{{g}_{6}}=-{{\beta }_{1}}+max\left( \frac{a-\hat{y}_{\sim 1}^{\min }}{x_{1}^{*\max }},
-\frac{\hat{y}_{\sim 1}^{\max }-b}{x_{1}^{*\min }} \right)\le 0\\
& \qquad \qquad \qquad \vdots \notag \\
&{{g}_{2m+3}}={{\beta}_{m}}-min\left( \frac{b-\hat{y}_{\sim m}^{\max }}{x_{m}^{*\max }},
-\frac{\hat{y}_{\sim m}^{\min }-a}{x_{m}^{*\min }} \right)\le 0 \\
&{{g}_{2m+4}}=-{{\beta}_{m}}+max\left( \frac{a-\hat{y}_{\sim m}^{\min }}{x_{m}^{*\max }},
-\frac{\hat{y}_{\sim m}^{\max }-b}{x_{m}^{*\min}} \right)\le 0 \\
&{{g}_{2m+5}}=\sigma -b+a\le 0\\
&{{g}_{2m+6}}=-\sigma +0.001\le 0,
\end{align*}
where
{{D}_{i}}=\sqrt{2\pi }\sigma \left\{ \Phi \left( \frac{b-\boldsymbol{{{x}_{i}}^{*}\beta} }{\sigma }
\right)-\Phi \left( \frac{a-\boldsymbol{{{x}_{i}}^{*}\beta} }{\sigma } \right) \right\}.
We can re-specify the model by setting \boldsymbol{\gamma} ={{\left( {{\beta }_{0}}\cdots
{{\beta }_{m}}\text{ }\sigma \right)}^{T}} and c\left( \boldsymbol{\gamma} \right)={{\left( {{g}_{1}}\cdots
{{g}_{2m+6}} \right)}^{T}} and derive
\begin{align*}
Minimize \quad &f\left( \boldsymbol{\gamma} \right) \\
Subject \quad to \quad &c\left( \boldsymbol{\gamma} \right)\le 0.
\end{align*}
Now, we add a Lagrangian multiplier and form a new objective function
\begin{equation*}
l\left( \boldsymbol{\gamma};\boldsymbol{\lambda} \right)=f\left( \boldsymbol{\gamma} \right)
+{{\boldsymbol{\lambda }}^{T}}c\left( \boldsymbol{\gamma} \right),
\end{equation*}
where
\boldsymbol{\lambda} ={{\left( {{\lambda }_{1}}\cdots {{\lambda }_{2m+6}} \right)}^{T}}, and
\boldsymbol{d}={{\left( {{d}_{{{\beta }_{0}}}}\cdots {{d}_{\sigma }} \right)}^{T}}. To solve a COP
problem, we need to calculate
c\left( \boldsymbol{\gamma} \right),
{\partial f\left( \boldsymbol{\gamma}
\right)}/{\partial \boldsymbol{\gamma} }\;,
{\partial c\left( \boldsymbol{\gamma} \right)}/{\partial
\boldsymbol{\gamma} }\;, and
{{{\partial }^{2}}l\left( \boldsymbol{\gamma} ;\boldsymbol{\lambda} \right)}
/{\partial \boldsymbol{\gamma} \partial {{\boldsymbol{\gamma }}^{T}}}\;. We have already determined
c\left( \boldsymbol{\gamma} \right). Regarding
{\partial f\left( \boldsymbol{\gamma} \right)}/{\partial
\boldsymbol{\gamma} }\;,
\begin{equation*}
\frac{\partial f\left( \boldsymbol{\gamma} \right)}{\partial \boldsymbol{\gamma} }={{\left( \frac{\partial f}
{\partial {{\beta }_{0}}}\cdots \frac{\partial f}{\partial {{\beta }_{m}}}\frac{\partial f}{\partial \sigma }
\right)}^{T}}.
\end{equation*}
Specifically,
\begin{align*}
& \frac{\partial f}{\partial {{\beta }_{j}}}=\sum\limits_{i=1}^{n}{\frac{1}{{{D}_{i}}}\frac{\partial {{D}_{i}}}
{\partial {{\beta }_{j}}}}-\frac{1}{{{\sigma }^{2}}}\sum\limits_{i=1}^{n}{\left( {{y}_{i}}-
\boldsymbol{{{x}_{i}^{*}}\beta} \right)}{{x}_{ij}^{*}} \\
& \frac{\partial f}{\partial \sigma }=\sum\limits_{i=1}^{n}{\frac{1}{{{D}_{i}}}\frac{\partial {{D}_{i}}}
{\partial \sigma }}-\frac{1}{{{\sigma }^{3}}}\sum\limits_{i=1}^{n}{{{\left( {{y}_{i}}-\boldsymbol{{{x}_{i}^{*}}\beta}
\right)}^{2}}},
\end{align*}
where
\begin{align*}
& \frac{\partial {{D}_{i}}}{\partial {{\beta }_{j}}}=-{{x}^{*}_{ij}}\exp \left[ \frac{-{{\left( b-
\boldsymbol{{{x}_{i}^{*}}\beta} \right)}^{2}}}{2{{\sigma }^{2}}} \right]+{{x}^{*}_{ij}}\exp \left[
\frac{-{{\left( a-\boldsymbol{{{x}_{i}^{*}}\beta} \right)}^{2}}}{2{{\sigma }^{2}}} \right] \\
& \frac{\partial {{D}_{i}}}{\partial \sigma }=\frac{-\left( b-\boldsymbol{{{x}_{i}^{*}}\beta} \right)}
{\sigma }\exp \left[ \frac{-{{\left( b-\boldsymbol{{{x}_{i}^{*}}\beta} \right)}^{2}}}{2{{\sigma }^{2}}} \right]
+\frac{\left( a-\boldsymbol{{{x}_{i}^{*}}\beta} \right)}{\sigma }\exp \left[ \frac{-{{\left( a-
\boldsymbol{{{x}_{i}^{*}}\beta} \right)}^{2}}}{2{{\sigma }^{2}}} \right]+\frac{{{D}_{i}}}{\sigma }.
\end{align*}
Regarding
{\partial c\left( \boldsymbol{\gamma} \right)}/{\partial \boldsymbol{\gamma} }\;,
\begin{equation*}
\frac{\partial c\left( \boldsymbol{\gamma} \right)}{\partial \boldsymbol{\gamma} }=
\left(
\begin{matrix}
\frac{\partial {{g}_{1}}}{\partial {{\beta }_{0}}} & \cdots & \frac{\partial {{g}_{1}}}
{\partial {{\beta }_{m}}} & \frac{\partial {{g}_{1}}}{\partial {{\sigma}}} \\
\vdots & \cdots & \vdots & \vdots \\
\frac{\partial {{g}_{2m+6}}}{\partial {{\beta }_{0}}} & \cdots & \frac{\partial {{g}_{2m+6}}}
{\partial {{\beta }_{m}}} & \frac{\partial {{g}_{2m+6}}}{\partial {{\sigma}}} \\
\end{matrix}
\right).
\end{equation*}
Specifically,
19
\begin{align*}
&\frac{\partial {{g}_{1}}}{\partial {{\beta }_{0}}}=1, &\frac{\partial {{g}_{1}}}{\partial
{{\beta }_{j}}}&=v_{j}^{+}x_{j}^{*\max }+v_{j}^{-}x_{j}^{*\min }, & \frac{\partial {{g}_{1}}}
{\partial \sigma }&=0, \\
&\frac{\partial {{g}_{2}}}{\partial {{\beta }_{0}}}=-1, & \frac{\partial {{g}_{2}}}{\partial
{{\beta }_{j}}}&=-v_{j}^{+}x_{j}^{*\min }-v_{j}^{-}x_{j}^{*\max }, & \frac{\partial {{g}_{2}}}
{\partial \sigma }&=0, \\
&\frac{\partial {{g}_{3}}}{\partial {{\beta}_{0}}}=1, & \frac{\partial {{g}_{3}}}{\partial
{{\beta}_{j}}}&=0, & \frac{\partial {{g}_{3}}}{\partial \sigma}&=0, \\
&\frac{\partial {{g}_{4}}}{\partial {{\beta}_{0}}}=-1, & \frac{\partial {{g}_{4}}}{\partial
{{\beta}_{j}}}&=0, & \frac{\partial {{g}_{4}}}{\partial \sigma}&=0.
\end{align*}
For the
5th to (
2m+4)th rows, if
i=j, then
\begin{equation*}
\frac{\partial {{g}_{2i+3}}}{\partial {{\beta }_{j}}}=1, \frac{\partial {{g}_{2i+4}}}{\partial {{\beta }_{j}}}=-1.
\end{equation*}
Otherwise,
\begin{align*}
&\frac{\partial {{g}_{2i+3}}}{\partial {{\beta }_{j}}}=
\begin{cases}
\frac{v_{j}^{+}x_{j}^{*\max }+v_{j}^{-}x_{j}^{*\min }}{x_{i}^{*\max }} & if \quad
\frac{b-\hat{y}_{\sim 1}^{\max }}{x_{1}^{*\max }}\le -\frac{\hat{y}_{\sim 1}^{\min }-a}{x_{1}^{*\min }} \\
\frac{v_{j}^{+}x_{j}^{*\min }+v_{j}^{-}x_{j}^{*\max }}{x_{i}^{*\min }} & if \quad
\frac{b-\hat{y}_{\sim 1}^{\max }}{x_{1}^{*\max }}>-\frac{\hat{y}_{\sim 1}^{\min }-a}{x_{1}^{*\min }}
\end{cases}\\
&\frac{\partial {{g}_{2i+4}}}{\partial {{\beta }_{j}}}=
\begin{cases}
-\frac{v_{j}^{+}x_{j}^{*\min }+v_{j}^{-}x_{j}^{*\max }}{x_{i}^{*\max }} & if
\quad \frac{a-\hat{y}_{\sim 1}^{\min }}{x_{1}^{*\max }}\ge -\frac{\hat{y}_{\sim 1}^{\max }-b}{x_{1}^{*\min }} \\
-\frac{v_{j}^{+}x_{j}^{*\max }+v_{j}^{-}x_{j}^{*\min }}{x_{i}^{*\min }} & if \quad
\frac{a-\hat{y}_{\sim 1}^{\min }}{x_{1}^{*\max }}<-\frac{\hat{y}_{\sim 1}^{\max }-b}{x_{1}^{*\min }}.
\end{cases}
\end{align*}
Also,
\begin{equation*}
\frac{\partial {{g}_{2i+3}}}{\partial \sigma }=0, \frac{\partial {{g}_{2i+4}}}{\partial \sigma }=0.
\end{equation*}
For the last two rows,
\begin{align*}
&\frac{\partial {{g}_{2i+5}}}{\partial {{\beta }_{j}}}=0, \frac{\partial {{g}_{2i+5}}}{\partial \sigma }=1, \\
&\frac{\partial {{g}_{2i+6}}}{\partial {{\beta }_{j}}}=0, \frac{\partial {{g}_{2i+6}}}{\partial \sigma }=-1.
\end{align*}
Regarding
{{{\partial }^{2}}l\left( \boldsymbol{\gamma} ;\boldsymbol{\lambda} \right)}/{\partial
\boldsymbol{\gamma} \partial {{\boldsymbol{\gamma} }^{T}}}\;,
\begin{align*}
& \frac{{{\partial }^{2}}l}{\partial {{\beta }_{j}}\partial {{\beta }_{k}}}=\sum\limits_{i=1}^{n}
{\frac{-1}{{{D}_{i}}^{2}}\left( \frac{\partial {{D}_{i}}}{\partial {{\beta }_{j}}} \right)}\left(
\frac{\partial {{D}_{i}}}{\partial {{\beta }_{k}}} \right)+\sum\limits_{i=1}^{n}{\frac{1}{{{D}_{i}}}
\frac{{{\partial }^{2}}{{D}_{i}}}{\partial {{\beta }_{j}}\partial {{\beta }_{k}}}}
+\frac{1}{{{\sigma }^{2}}}\sum\limits_{i=1}^{n}{{{x}^{*}_{ik}}}{{x}^{*}_{ij}} \\
& \frac{{{\partial }^{2}}l}{\partial {{\beta }_{j}}\partial \sigma }=\sum\limits_{i=1}^{n}{\frac{-1}
{{{D}_{i}}^{2}}\left( \frac{\partial {{D}_{i}}}{\partial {{\beta }_{j}}} \right)\left(
\frac{\partial {{D}_{i}}}{\partial \sigma } \right)}+\sum\limits_{i=1}^{n}{\frac{1}{{{D}_{i}}}
\frac{{{\partial }^{2}}{{D}_{i}}}{\partial {{\beta }_{j}}\partial \sigma }}+\frac{2}{{{\sigma }^{3}}}
\sum\limits_{i=1}^{n}{\left( {{y}_{i}}-\boldsymbol{{{x}_{i}^{*}}\beta} \right)}{{x}_{ij}^{*}} \\
& \frac{{{\partial }^{2}}l}{\partial \sigma \partial \sigma }=\sum\limits_{i=1}^{n}{\frac{-1}{{{D}_{i}}^{2}}
\left( \frac{\partial {{D}_{i}}}{\partial \sigma } \right)}\left( \frac{\partial {{D}_{i}}}
{\partial \sigma } \right)+\sum\limits_{i=1}^{n}{\frac{1}{{{D}_{i}}}\frac{{{\partial }^{2}}{{D}_{i}}}
{\partial \sigma \partial \sigma }}+\frac{3}{{{\sigma }^{4}}}\sum\limits_{i=1}^{n}{{{\left( {{y}_{i}}
-\boldsymbol{{{x}_{i}^{*}}\beta} \right)}^{2}}},
\end{align*}
where
\begin{align*}
& \frac{{{\partial }^{2}}{{D}_{i}}}{\partial {{\beta }_{j}}\partial {{\beta }_{k}}}=\frac{-{{x}^{*}_{ij}}
{{x}_{ik}^{*}}\left( b-\boldsymbol{{{x}_{i}^{*}}\beta} \right)}{{{\sigma }^{2}}}\exp \left[ \frac{-{{\left(
b-\boldsymbol{{{x}_{i}^{*}}\beta} \right)}^{2}}}{2{{\sigma }^{2}}} \right]+\frac{{{x}^{*}_{ij}}{{x}_{ik}^{*}}\left(
a-\boldsymbol{{{x}_{i}^{*}}\beta} \right)}{{{\sigma }^{2}}}\exp \left[ \frac{-{{\left( a-\boldsymbol{{{x}_{i}^{*}}\beta}
\right)}^{2}}}{2{{\sigma }^{2}}} \right] \\
& \frac{{{\partial }^{2}}{{D}_{i}}}{\partial {{\beta }_{j}}\partial \sigma }=\frac{-{{x}^{*}_{ij}}{{\left(
b-\boldsymbol{{{x}_{i}^{*}}\beta} \right)}^{2}}}{{{\sigma }^{3}}}\exp \left[ \frac{-{{\left( b-
\boldsymbol{{{x}_{i}^{*}}\beta} \right)}^{2}}}{2{{\sigma }^{2}}} \right]+\frac{{{x}^{*}_{ij}}{{\left( a-
\boldsymbol{{{x}_{i}^{*}}\beta} \right)}^{2}}}{{{\sigma }^{3}}}\exp \left[ \frac{-{{\left( a-
\boldsymbol{{{x}_{i}^{*}}\beta} \right)}^{2}}}{2{{\sigma }^{2}}} \right] \\
& \frac{{{\partial }^{2}}{{D}_{i}}}{\partial \sigma \partial \sigma }=-\frac{{{\left( b-
\boldsymbol{{{x}_{i}^{*}}\beta} \right)}^{3}}}{{{\sigma }^{4}}}\exp \left[ \frac{-{{\left( b-
\boldsymbol{{{x}_{i}^{*}}\beta} \right)}^{2}}}{2{{\sigma }^{2}}} \right]+\frac{{{\left( a-
\boldsymbol{{{x}_{i}^{*}}\beta} \right)}^{3}}}{{{\sigma }^{4}}}\exp \left[ \frac{-{{\left( a-
\boldsymbol{{{x}_{i}^{*}}\beta} \right)}^{2}}}{2{{\sigma }^{2}}} \right].
\end{align*}
Given the above information, we can implement the sequential quadratic programming algorithm to
solve the truncated regression model as a COP problem. As for the replication of Hellwig and Samuels's models, we
fixed the covariate matrix at the minimum, and the interaction terms are recalculated by using the covariate values
after being fixed. The new covariate matrix is marked as {{x}^{\dagger }}. We use the notion as stated in the
appendix of the article. The model specification is
\begin{align*}
Minimize \qquad &\ln L=\sum\limits_{i=1}^{n}{\ln {{D}_{i}}}+\frac{1}{2{{\sigma }^{2}}}
\sum\limits_{i=1}^{n}{{{\left( {{y}_{i}}-\boldsymbol{{{x}_{i}^{\dagger}}\beta} \right)}^{2}}} \\
Subject \quad to \qquad
&{{g}_{1}}={{\beta }_{0}}+\hat{y}_{\sim 0}^{\max }-b\le 0 \\
&{{g}_{2}}=a-{{\beta }_{0}}-\hat{y}_{\sim 0}^{\min }\le 0 \\
&{{g}_{3}}={{\beta }_{0}}-b\le 0 \\
&{{g}_{4}}=-{{\beta }_{0}}+a\le 0 \\
&{{g}_{5}}={{\beta }_{1}}-\frac{b-\hat{y}_{\sim 1}^{\max }}{x_{1}^{\dagger \max }}\\
&{{g}_{6}}=-{{\beta }_{1}}+\frac{a-\hat{y}_{\sim 1}^{\min }}{x_{1}^{\dagger \max }}\\
& \qquad \qquad \qquad \vdots \notag \\
&{{g}_{29}}={{\beta }_{13}}-\frac{b-\hat{y}_{\sim 13}^{\max }}{x_{1}^{\dagger \max }} \\
&{{g}_{30}}=-{{\beta }_{13}}+\frac{a-\hat{y}_{\sim 13}^{\min }}{x_{1}^{\dagger \max }} \\
&{{g}_{31}}=\sigma -b+a\le 0\\
&{{g}_{32}}=-\sigma +0.001\le 0.
\end{align*}
Notice that the definitions of
\hat{y}_{\sim j}^{\max } and
\hat{y}_{\sim j}^{\min } are different from
the simulation model due to the dummy variables (see the article's Appendix section). The rest of the model
information is the same except
c\left( \boldsymbol{\gamma} \right) and
{\partial c\left( \boldsymbol{\gamma}
\right)}/{\partial \boldsymbol{\gamma} }\;.
The difference of c\left( \boldsymbol{\gamma} \right) results from different centering methods.
Thus, there are some corresponding changes in {\partial c\left( \boldsymbol{\gamma} \right)}/
{\partial \boldsymbol{\gamma} }\;, and they are all associated with the dummy variables. For the first two rows,
these changes are
\begin{align*}
&\frac{\partial {{g}_{1}}}{\partial {{\beta }_{j}}}=v_{j}^{+}x_{j}^{\dagger \max }&,
\frac{\partial {{g}_{2}}}{\partial {{\beta }_{j}}}&=-v_{j}^{-}x_{j}^{\dagger \max }\\
&\frac{\partial {{g}_{1}}}{\partial {{\beta }_{k}}}=
\begin{cases}
1 & if \quad {{\beta }_{k}}=\max \left( {{\beta }_{10}}\cdots {{\beta }_{13}} \right) \\
0 & if \quad otherwise
\end{cases}&,
\frac{\partial {{g}_{2}}}{\partial {{\beta }_{k}}}&=
\begin{cases}
-1 & if \quad {{\beta }_{k}}=\min \left( {{\beta }_{10}}\cdots {{\beta }_{13}} \right) \\
0 & if \quad otherwise
\end{cases}
\end{align*}
where
j=\left\{ 0\cdots 9 \right\} and
k=\left\{ 10\cdots 13 \right\}.
From the 5th to 22th rows, if i=j,
\begin{equation*}
\frac{\partial {{g}_{2i+3}}}{\partial {{\beta }_{j}}}=1, \frac{\partial {{g}_{2i+4}}}{\partial {{\beta }_{j}}}=-1.
\end{equation*}
Otherwise,
\begin{align*}
&\frac{\partial {{g}_{2i+3}}}{\partial {{\beta }_{j}}}=\frac{v_{j}^{+}x_{j}^{\dagger \max }}
{x_{i}^{\dagger \max }}&,
\frac{\partial {{g}_{2i+4}}}{\partial {{\beta }_{j}}}&=-\frac{v_{j}^{-}x_{j}^{\dagger \max }}
{x_{i}^{\dagger \max }}\\
&\frac{\partial {{g}_{2i+3}}}{\partial {{\beta }_{k}}}=
\begin{cases}
\frac{1}{x_{i}^{\dagger \max }} & if \thinspace {{\beta }_{k}}=\max \left( {{\beta }_{10}}\cdots
{{\beta }_{13}} \right) \\
0 & if \thinspace otherwise
\end{cases}&,
\frac{\partial {{g}_{2i+4}}}{\partial {{\beta }_{k}}}&=
\begin{cases}
\frac{-1}{x_{i}^{\dagger \max }} & if \thinspace {{\beta }_{k}}=\min \left( {{\beta }_{10}}\cdots
{{\beta }_{13}} \right) \\
0 & if \thinspace otherwise
\end{cases}
\end{align*}
where
i=\left\{ 1\cdots 9 \right\},
j=\left\{ 0\cdots 9 \right\} and
k=\left\{ 10\cdots 13 \right\}.
From the 23th to 30th rows
\begin{align*}
&\frac{\partial {{g}_{2i+3}}}{\partial {{\beta }_{j}}}=\frac{v_{j}^{+}x_{j}^{\dagger \max }}
{x_{i}^{\dagger \max }}&,
\frac{\partial {{g}_{2i+4}}}{\partial {{\beta }_{j}}}&=-\frac{v_{j}^{-}x_{j}^{\dagger \max }}
{x_{i}^{\dagger \max }}\\
&\frac{\partial {{g}_{2i+3}}}{\partial {{\beta }_{k}}}=
\begin{cases}
1 & if \quad i=k \\
0 & if \quad otherwise
\end{cases}&,
\frac{\partial {{g}_{2i+4}}}{\partial {{\beta }_{k}}}&=
\begin{cases}
-1 & if \quad i=k \\
0 & if \quad otherwise
\end{cases}
\end{align*}
where
i=\left\{ 10\cdots 13 \right\},
j=\left\{ 0\cdots 9 \right\} and
k=\left\{ 10\cdots 13 \right\}.
____________________
Footnote
19 Here we set v_{0}^{+}=1 and v_{0}^{-}=1,
since every predicted value has the constant.